const runtime.pageSize
73 uses
runtime (current package)
arena.go#L194: userArenaChunkPages = userArenaChunkBytes / pageSize
arena.go#L204: if userArenaChunkPages*pageSize != userArenaChunkBytes {
arena.go#L692: asanpoison(unsafe.Pointer(span.limit), span.npages*pageSize-span.elemsize)
arena.go#L761: if s.npages*pageSize != userArenaChunkBytes {
arena.go#L782: sysFault(unsafe.Pointer(s.base()), s.npages*pageSize)
arena.go#L787: gcController.heapInUse.add(-int64(s.npages * pageSize))
arena.go#L793: gcController.totalFree.Add(int64(s.npages * pageSize))
arena.go#L800: atomic.Xaddint64(&stats.committed, -int64(s.npages*pageSize))
arena.go#L801: atomic.Xaddint64(&stats.inHeap, -int64(s.npages*pageSize))
arena.go#L803: atomic.Xadd64(&stats.largeFree, int64(s.npages*pageSize))
arena.go#L807: gcController.update(-int64(s.npages*pageSize), 0)
arena.go#L842: if s.npages*pageSize != userArenaChunkBytes {
malloc.go#L118: pageSize = _PageSize
malloc.go#L260: pagesPerArena = heapArenaBytes / pageSize
mbitmap.go#L732: size := s.npages * pageSize
mbitmap.go#L742: size := s.npages * pageSize
mbitmap.go#L1351: pages := divRoundUp(bitmapBytes, pageSize)
mcache.go#L212: gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc))
mcache.go#L241: atomic.Xadd64(&stats.largeAlloc, int64(npages*pageSize))
mcache.go#L246: gcController.totalAlloc.Add(int64(npages * pageSize))
mcache.go#L249: gcController.update(int64(s.npages*pageSize), 0)
mgcscavenge.go#L130: maxPagesPerPhysPage = maxPhysPageSize / pageSize
mgcscavenge.go#L739: maxPages := max / pageSize
mgcscavenge.go#L740: if max%pageSize != 0 {
mgcscavenge.go#L749: minPages := physPageSize / pageSize
mgcscavenge.go#L763: addr := chunkBase(ci) + uintptr(base)*pageSize
mgcscavenge.go#L784: sysUnused(unsafe.Pointer(addr), uintptr(npages)*pageSize)
mgcscavenge.go#L788: nbytes := int64(npages * pageSize)
mgcscavenge.go#L811: return uintptr(npages) * pageSize
mgcscavenge.go#L971: if physHugePageSize > pageSize && physHugePageSize > physPageSize {
mgcscavenge.go#L978: pagesPerHugePage := uintptr(physHugePageSize / pageSize)
mgcscavenge.go#L1113: newSearchAddr := chunkBase(i) + pallocChunkBytes - pageSize
mgcscavenge.go#L1167: addr := chunkBase(ci) + uintptr(page+npages-1)*pageSize
mgcwork.go#L27: if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 {
mgcwork.go#L378: s = mheap_.allocManual(workbufAlloc/pageSize, spanAllocWorkBuf)
mheap.go#L708: return ha.spans[(p/pageSize)%pagesPerArena]
mheap.go#L719: return mheap_.arenas[ai.l1()][ai.l2()].spans[(p/pageSize)%pagesPerArena]
mheap.go#L746: pageIdx = ((p / pageSize) / 8) % uintptr(len(arena.pageInUse))
mheap.go#L747: pageMask = byte(1 << ((p / pageSize) % 8))
mheap.go#L927: traceGCSweepSpan((n0 - nFreed) * pageSize)
mheap.go#L1000: p := base / pageSize
mheap.go#L1006: ai = arenaIndex(base + n*pageSize)
mheap.go#L1048: arenaLimit := arenaBase + npage*pageSize
mheap.go#L1071: npage -= (arenaLimit - arenaBase) / pageSize
mheap.go#L1184: needPhysPageAlign := physPageAlignedStacks && typ == spanAllocStack && pageSize < physPageSize
mheap.go#L1218: extraPages := physPageSize / pageSize
mheap.go#L1347: nbytes := npages * pageSize
mheap.go#L1388: nbytes := npages * pageSize
mheap.go#L1392: s.limit = s.base() + s.npages*pageSize
mheap.go#L1472: ask := alignUp(npage, pallocChunkPages) * pageSize
mheap.go#L1619: nbytes := s.npages * pageSize
mheap.go#L1818: arenaPage := (s.base() / pageSize) % pagesPerArena
mheap.go#L1826: arenaPage := (s.base() / pageSize) % pagesPerArena
mpagealloc.go#L59: pallocChunkBytes = pallocChunkPages * pageSize
mpagealloc.go#L118: return uint(p % pallocChunkBytes / pageSize)
mpagealloc.go#L428: p.update(base, size/pageSize, true, false)
mpagealloc.go#L489: limit := base + npages*pageSize - 1
mpagealloc.go#L576: limit := base + npages*pageSize - 1
mpagealloc.go#L605: return uintptr(scav) * pageSize
mpagealloc.go#L775: foundFree(levelIndexToOffAddr(l, i+j), (uintptr(1)<<logMaxPages)*pageSize)
mpagealloc.go#L813: addr := levelIndexToOffAddr(l, i).add(uintptr(base) * pageSize).addr()
mpagealloc.go#L854: addr := chunkBase(ci) + uintptr(j)*pageSize
mpagealloc.go#L858: searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize
mpagealloc.go#L897: addr = chunkBase(i) + uintptr(j)*pageSize
mpagealloc.go#L898: searchAddr = offAddr{chunkBase(i) + uintptr(searchIdx)*pageSize}
mpagealloc.go#L943: limit := base + npages*pageSize - 1
mpagecache.go#L46: return c.base + i*pageSize, uintptr(scav) * pageSize
mpagecache.go#L66: return c.base + uintptr(i*pageSize), uintptr(scav) * pageSize
mpagecache.go#L138: base: chunkBase(ci) + alignDown(uintptr(j), 64)*pageSize,
mpagecache.go#L155: base: alignDown(addr, 64*pageSize),
mpagecache.go#L181: p.searchAddr = offAddr{c.base + pageSize*(pageCachePages-1)}
|
The pages are generated with Golds v0.6.7. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |